In [1]:
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import clear_output
from qiskit import QuantumCircuit
from qiskit.circuit import ParameterVector
from qiskit.circuit.library import ZFeatureMap
from qiskit.quantum_info import SparsePauliOp
from qiskit_algorithms.optimizers import COBYLA
from qiskit_algorithms.utils import algorithm_globals
from qiskit_machine_learning.algorithms.classifiers import NeuralNetworkClassifier
from qiskit_machine_learning.neural_networks import EstimatorQNN
from sklearn.model_selection import train_test_split
from keras.models import Sequential

from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
algorithm_globals.random_seed = 12345

import os
import cv2
import tarfile
import numpy as np
from tensorflow.keras.utils import to_categorical
In [2]:
# Directory paths
base_dir = "C:\\Users\\Lakshya Singh\\Downloads\\ILSVRC2012"
German_Shepherd_dir = os.path.join(base_dir, 'German_Shepherd')
Border_Terrier_dir = os.path.join(base_dir, 'Border_Terrier')
Briard_dir = os.path.join(base_dir, 'Briard')
English_Foxhound_dir = os.path.join(base_dir, 'English_Foxhound')
Ibizan_Hound_dir = os.path.join(base_dir, 'Ibizan_Hound')
In [3]:
# Image parameters
IMG_HEIGHT = 224
IMG_WIDTH = 224

# Function to load and preprocess images
def load_and_preprocess_images(directory, img_height, img_width):
    images = []
    labels = []
    
    # Define a mapping from class names to numerical labels
    label_map = {'German_Shepherd': 0, 'Border_Terrier': 1, 'Briard': 2, 'English_Foxhound': 3, 'Ibizan_Hound': 4}

    # Iterate over each class name and its corresponding label
    for class_name, class_label in label_map.items():
        # Construct the path to the tar file for the class
        tar_file_path = os.path.join(directory, class_name + '.tar')
        # Create a temporary directory for extracting the contents of the tar file
        temp_extract_dir = os.path.join(directory, 'temp_' + class_name)
        os.makedirs(temp_extract_dir, exist_ok=True)

        # Open the tar file and extract its contents
        with tarfile.open(tar_file_path, 'r') as tar_ref:
            tar_ref.extractall(temp_extract_dir)

        # Process each image in the extracted directory
        class_dir = os.path.join(temp_extract_dir)
        for filename in os.listdir(class_dir):
            img_path = os.path.join(class_dir, filename)
            # Read the image using OpenCV
            image = cv2.imread(img_path)
            if image is not None:
                # Resize the image to the desired dimensions
                image = cv2.resize(image, (img_width, img_height))
                # Normalize the image pixel values to the range [0, 1]
                image = image / 255.0
                # Append the processed image to the images list
                images.append(image)
                # Append the corresponding label to the labels list
                labels.append(class_label)

    # Convert the images list to a NumPy array and the labels list to a categorical NumPy array
    return np.array(images), labels


X, y = load_and_preprocess_images(base_dir, IMG_HEIGHT, IMG_WIDTH)
In [6]:
images, labels = X,y

train_images, test_images, train_labels, test_labels = train_test_split(
    images, labels, test_size=0.3, random_state=42
)
In [17]:
def conv_circuit(params):
    # Assuming 2 qubits are still sufficient. 
    # If more qubits are needed, the circuit should be expanded accordingly.
    target = QuantumCircuit(2)
    
    # Layer 1
    target.rz(params[0], 0)
    target.ry(params[1], 1)
    target.cx(1, 0)
    
    # Layer 2
    target.rz(params[2], 1)
    target.ry(params[3], 0)
    target.cx(0, 1)
    
    # Layer 3
    target.rz(params[4], 0)
    target.ry(params[5], 1)
    target.cx(1, 0)
    
    # Layer 4
    target.rz(np.pi / 2, 0)
    target.rz(np.pi / 2, 1)
    
    return target

# Let's draw this circuit and see what it looks like
params = ParameterVector("θ", length=6)
circuit = conv_circuit(params)
circuit.draw("mpl", style="clifford")
Out[17]:
In [18]:
def conv_layer(num_qubits, param_prefix):
    qc = QuantumCircuit(num_qubits, name="Convolutional Layer")
    qubits = list(range(num_qubits))
    param_index = 0
    params = ParameterVector(param_prefix, length=num_qubits * 6)  # 6 params per qubit pair for extended conv_circuit

    # First convolutional layer (even qubits paired with odd qubits)
    for q1, q2 in zip(qubits[0::2], qubits[1::2]):
        qc = qc.compose(conv_circuit(params[param_index : (param_index + 6)]), [q1, q2])
        qc.barrier()
        param_index += 6
    
    # Second convolutional layer (sliding pairs, including wrap-around)
    for q1, q2 in zip(qubits[1::2], qubits[2::2] + [qubits[0]]):
        qc = qc.compose(conv_circuit(params[param_index : (param_index + 6)]), [q1, q2])
        qc.barrier()
        param_index += 6

    qc_inst = qc.to_instruction()

    # Final circuit with repeated convolutional layers
    qc = QuantumCircuit(num_qubits)
    qc.append(qc_inst, qubits)
    return qc


circuit = conv_layer(4, "θ")
circuit.decompose().draw("mpl", style="clifford")
Out[18]:
In [19]:
def pool_circuit(params):
    target = QuantumCircuit(2)
    
    # First layer of operations
    target.rz(params[0], 0)
    target.ry(params[1], 1)
    target.cx(1, 0)
    
    # Second layer of operations
    target.rz(params[2], 1)
    target.cx(0, 1)
    
    # Third layer of operations (new addition for enhanced pooling)
    target.ry(params[3], 0)
    target.rz(params[4], 1)
    target.cx(1, 0)

    return target

# Generate parameter vector with increased length
params = ParameterVector("θ", length=5)
circuit = pool_circuit(params)
circuit.draw("mpl", style="clifford")
Out[19]:
In [20]:
def pool_layer(sources, sinks, param_prefix):
    num_qubits = len(sources) + len(sinks)
    qc = QuantumCircuit(num_qubits, name="Pooling Layer")
    param_index = 0
    
    # Updated length for params to match the modified pool_circuit
    params = ParameterVector(param_prefix, length=len(sources) * 5)
    
    # Use the updated pool_circuit with 5 parameters
    for source, sink in zip(sources, sinks):
        qc = qc.compose(pool_circuit(params[param_index : (param_index + 5)]), [source, sink])
        qc.barrier()
        param_index += 5

    qc_inst = qc.to_instruction()

    qc = QuantumCircuit(num_qubits)
    qc.append(qc_inst, range(num_qubits))
    return qc

# Example use with sources and sinks
sources = [0, 1]
sinks = [2, 3]
circuit = pool_layer(sources, sinks, "θ")
circuit.decompose().draw("mpl", style="clifford")
Out[20]:
In [21]:
def plot_images(images, n, img_height, img_width):
    # Determine the number of rows and columns for the grid
    cols = int(np.ceil(np.sqrt(n)))
    rows = int(np.ceil(n / cols))
    
    # Create subplots
    fig, axes = plt.subplots(rows, cols, figsize=(cols * 4, rows * 4), subplot_kw={"xticks": [], "yticks": []})
    
    # Flatten the axes array if there's only one row or column
    if rows == 1:
        axes = axes.reshape(1, -1)
    if cols == 1:
        axes = axes.reshape(-1, 1)
    
    # Plot images
    for i in range(n):
        if i < len(images):
            ax = axes[i // cols, i % cols]
            ax.imshow(images[i])
            ax.set_title(f'Image {i+1}', fontsize=10)
        else:
            # Hide empty subplots if n < number of subplots
            axes[i // cols, i % cols].axis('off')
    
    plt.subplots_adjust(wspace=0.1, hspace=0.1)
    plt.show()


n_images_to_show = 20  # Number of images you want to display
plot_images(train_images, n_images_to_show, IMG_HEIGHT, IMG_WIDTH)
In [22]:
# Transforming the feature map
feature_map = ZFeatureMap(feature_dimension=8, reps=2)
circuit = feature_map.decompose()
circuit.draw("mpl", style="clifford")
Out[22]:
In [23]:
ansatz = QuantumCircuit(8, name="Ansatz")

# First Convolutional Layer
ansatz.compose(conv_layer(8, "c1"), list(range(8)), inplace=True)

# First Pooling Layer
ansatz.compose(pool_layer([0, 1, 2, 3], [4, 5, 6, 7], "p1"), list(range(8)), inplace=True)

# Second Convolutional Layer
ansatz.compose(conv_layer(4, "c2"), list(range(4, 8)), inplace=True)

# Second Pooling Layer
ansatz.compose(pool_layer([0, 1], [2, 3], "p2"), list(range(4, 8)), inplace=True)

# Third Convolutional Layer
ansatz.compose(conv_layer(2, "c3"), list(range(6, 8)), inplace=True)

# Third Pooling Layer
ansatz.compose(pool_layer([0], [1], "p3"), list(range(6, 8)), inplace=True)



# Combining the feature map and ansatz
circuit = QuantumCircuit(8)
circuit.compose(feature_map, range(8), inplace=True)
circuit.compose(ansatz, range(8), inplace=True)

# Define multiple observables for multi-class classification
observables = [
    SparsePauliOp.from_list([("Z" + "I" * 7, 1)]),
    SparsePauliOp.from_list([("I" + "Z" + "I" * 6, 1)]),
    SparsePauliOp.from_list([("I" * 2 + "Z" + "I" * 5, 1)]),
    SparsePauliOp.from_list([("I" * 3 + "Z" + "I" * 4, 1)]),
    SparsePauliOp.from_list([("I" * 4 + "Z" + "I" * 3, 1)])
]

# Define the QNN
qnn = EstimatorQNN(
    circuit=circuit.decompose(),
    observables=observables,
    input_params=feature_map.parameters,
    weight_params=ansatz.parameters,
)

objective_func_vals = []  # Initialize a list to store the objective function values
In [24]:
circuit.draw("mpl", style="clifford")
Out[24]:
In [37]:
from sklearn.preprocessing import MinMaxScaler
import numpy as np

scaler = MinMaxScaler()
train_images_flat = train_images.reshape(574, -1)
train_images_normalized = scaler.fit_transform(train_images_flat)


train_images_quantum = train_images_normalized[:, :8]


print(train_images_quantum.shape)
(574, 8)
In [38]:
def callback_graph(weights, obj_func_eval):
    clear_output(wait=True)
    objective_func_vals.append(obj_func_eval)
    plt.title("Objective function value against iteration")
    plt.xlabel("Iteration")
    plt.ylabel("Objective function value")
    plt.plot(range(len(objective_func_vals)), objective_func_vals)
    plt.show()
In [39]:
# Create the COBYLA optimizer
optimizer = COBYLA(maxiter=100)

# Initialize the classifier with one_hot=True
classifier = NeuralNetworkClassifier(
    qnn,
    optimizer=optimizer,
    one_hot=True,  # This tells the classifier to expect one-hot encoded labels
    callback=callback_graph,
)

# Train the classifier
classifier.fit(train_images_quantum,np.array(train_labels))
Out[39]:
<qiskit_machine_learning.algorithms.classifiers.neural_network_classifier.NeuralNetworkClassifier at 0x1c3040f7c90>
In [40]:
print(f"Accuracy from the train data : {np.round(100 * classifier.score(train_images_quantum,np.array(train_labels)), 2)}%")
Accuracy from the train data : 27.35%